#define evtchn_upcall_pending /* 0 */
#define evtchn_upcall_mask 1
+#define sizeof_vcpu_shift 3
+
+#ifdef CONFIG_SMP
+#define XEN_GET_VCPU_INFO(reg) movl TI_cpu(%ebp),reg ; \
+ shl $sizeof_vcpu_shift,reg ; \
+ addl HYPERVISOR_shared_info,reg
+#else
+#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
+#endif
+
#define XEN_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
#define XEN_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
- #define XEN_TEST_PENDING(reg) testb $0x1,evtchn_upcall_pending(reg)
+ #define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
#ifdef CONFIG_PREEMPT
- #ifdef CONFIG_SMP
- #define preempt_stop GET_THREAD_INFO(%ebp) ; \
- XEN_GET_VCPU_INFO(%esi) ; \
- XEN_BLOCK_EVENTS(%esi)
- #else
- #define preempt_stop XEN_GET_VCPU_INFO(%esi) ; \
- XEN_BLOCK_EVENTS(%esi)
- #endif
+ #define preempt_stop XEN_BLOCK_EVENTS(%esi)
#else
#define preempt_stop
#define resume_kernel restore_all
pushl %ebx; \
movl $(__USER_DS), %edx; \
movl %edx, %ds; \
- movl %edx, %es;
+ movl %edx, %es
+
+ #define SAVE_ALL \
+ SAVE_ALL_NO_EVENTMASK; \
- movl HYPERVISOR_shared_info, %esi; \
++ XEN_GET_VCPU_INFO(%esi); \
+ movb evtchn_upcall_mask(%esi), %dl; \
+ movb %dl, EVENT_MASK(%esp)
#define RESTORE_INT_REGS \
popl %ebx; \
movl %esp, %edx
pushl %esi # push the error code
pushl %edx # push the pt_regs pointer
- movl $(__KERNEL_DS), %edx # XXXcl USER?
+ movl $(__USER_DS), %edx
movl %edx, %ds
movl %edx, %es
- movl HYPERVISOR_shared_info, %esi
++ XEN_GET_VCPU_INFO(%esi)
+ movb evtchn_upcall_mask(%esi), %dl
+ movb %dl, EVENT_MASK+8(%esp)
call *%edi
addl $8, %esp
jmp ret_from_exception
jb 11f
cmpl $ecrit,%eax
jb critical_region_fixup
- 11: push %esp
-11: movl HYPERVISOR_shared_info, %esi
++11: XEN_GET_VCPU_INFO(%esi)
+ movb $0, EVENT_MASK(%esp)
+ push %esp
call evtchn_do_upcall
add $4,%esp
- XEN_GET_VCPU_INFO(%esi)
- movb CS(%esp),%cl
- test $2,%cl # slow return to ring 2 or 3
- jne ret_syscall_tests
+ jmp ret_from_intr
+
+ ALIGN
restore_all_enable_events:
- safesti:XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks
+ XEN_UNBLOCK_EVENTS(%esi)
scrit: /**** START OF CRITICAL REGION ****/
XEN_TEST_PENDING(%esi)
jnz 14f # process more events if necessary...
# This handler is special, because it gets an extra value on its stack,
# which is the linear faulting address.
- #define PAGE_FAULT_STUB(_name1, _name2) \
- ENTRY(_name1) \
- pushl %ds ; \
- pushl %eax ; \
- xorl %eax,%eax ; \
- pushl %ebp ; \
- pushl %edi ; \
- pushl %esi ; \
- pushl %edx ; \
- decl %eax /* eax = -1 */ ; \
- pushl %ecx ; \
- pushl %ebx ; \
- GET_THREAD_INFO(%ebp) ; \
- cld ; \
- movl %es,%ecx ; \
- movl ORIG_EAX(%esp), %esi /* get the error code */ ; \
- movl ES(%esp), %edi /* get the faulting address */ ; \
- movl %eax, ORIG_EAX(%esp) ; \
- movl %ecx, ES(%esp) ; \
- movl %esp,%edx ; \
- pushl %edi /* push the faulting address */ ; \
- pushl %esi /* push the error code */ ; \
- pushl %edx /* push the pt_regs pointer */ ; \
- movl $(__KERNEL_DS),%edx ; \
- movl %edx,%ds ; \
- movl %edx,%es ; \
- call _name2 ; \
- addl $12,%esp ; \
- jmp ret_from_exception ;
- PAGE_FAULT_STUB(page_fault, do_page_fault)
+ ENTRY(page_fault)
+ pushl %ds
+ pushl %eax
+ xorl %eax,%eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ decl %eax /* eax = -1 */
+ pushl %ecx
+ pushl %ebx
+ GET_THREAD_INFO(%ebp)
+ cld
+ movl %es,%ecx
+ movl ORIG_EAX(%esp), %esi /* get the error code */
+ movl ES(%esp), %edi /* get the faulting address */
+ movl %eax, ORIG_EAX(%esp)
+ movl %ecx, ES(%esp)
+ movl %esp,%edx
+ pushl %edi /* push the faulting address */
+ pushl %esi /* push the error code */
+ pushl %edx /* push the pt_regs pointer */
+ movl $(__KERNEL_DS),%edx
+ movl %edx,%ds
+ movl %edx,%es
- movl HYPERVISOR_shared_info, %esi
++ XEN_GET_VCPU_INFO(%esi)
+ movb evtchn_upcall_mask(%esi), %dl
+ movb %dl, EVENT_MASK+12(%esp)
+ call do_page_fault
+ addl $12,%esp
+ jmp ret_from_exception
#ifdef CONFIG_X86_MCE
ENTRY(machine_check)